pip install opencv-python numpy matplotlibimport matplotlib.pyplot as plt import cv2 as cv import numpy as np
cameraman.png image. cameraman.png image both up and down using the nearest neighbor method you have implemented. Perform at leat 6 differents resizings.import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the image
img = cv2.imread('Images\cameraman.png', cv2.IMREAD_GRAYSCALE) # grayscale
original_h, original_w = img.shape
print(f"Original size: {original_w}x{original_h}")
plt.imshow(img, cmap='gray')
plt.title(f'Original Image ({original_w}x{original_h})')
plt.show()
def nearest_neighbor_resize(image, new_size):
"""
Resize the image using nearest neighbor interpolation.
new_size: tuple (new_width, new_height)
"""
old_h, old_w = image.shape
new_w, new_h = new_size
if new_w/old_w != new_h/old_h:
print("⚠️ Warning: Aspect ratio not preserved!")
resized = np.zeros((new_h, new_w), dtype=image.dtype)
for i in range(new_h):
for j in range(new_w):
# Mapping coordinates from new image to old image
x = int(j * old_w / new_w)
y = int(i * old_h / new_h)
resized[i, j] = image[y, x]
return resized# Define new sizes (both upscaling and downscaling)
sizes = [
(int(original_w*0.5), int(original_h*0.5)), # down 50%
(int(original_w*0.8), int(original_h*0.8)), # down 20%
(int(original_w*1.2), int(original_h*1.2)), # up 20%
(int(original_w*1.5), int(original_h*1.5)), # up 50%
(int(original_w*2), int(original_h*2)), # up 100%
(int(original_w*0.3), int(original_h*0.3)), # down 70%
]
# Resize and store images
nn_images = [nearest_neighbor_resize(img, sz) for sz in sizes]
# Display
plt.figure(figsize=(15,5))
plt.subplot(2, 4, 1)
plt.imshow(img, cmap='gray')
plt.title(f'Original ({original_w}x{original_h})')
plt.axis('off')
for i, resized in enumerate(nn_images):
plt.subplot(2, 4, i+2)
h, w = resized.shape
plt.imshow(resized, cmap='gray')
plt.title(f'{w}x{h}')
plt.axis('off')
plt.tight_layout()
plt.show()def bilinear_resize(image, new_size):
old_h, old_w = image.shape
new_w, new_h = new_size
if new_w/old_w != new_h/old_h:
print("⚠️ Warning: Aspect ratio not preserved!")
resized = np.zeros((new_h, new_w), dtype=image.dtype)
for i in range(new_h):
for j in range(new_w):
x = j * (old_w - 1) / (new_w - 1)
y = i * (old_h - 1) / (new_h - 1)
x0 = int(np.floor(x))
x1 = min(x0 + 1, old_w - 1)
y0 = int(np.floor(y))
y1 = min(y0 + 1, old_h - 1)
a = x - x0
b = y - y0
# Bilinear interpolation formula
resized[i,j] = (1-a)*(1-b)*image[y0,x0] + a*(1-b)*image[y0,x1] + (1-a)*b*image[y1,x0] + a*b*image[y1,x1]
return resized.astype(np.uint8)# Resize and store images
bilinear_images = [bilinear_resize(img, sz) for sz in sizes]
# Display
plt.figure(figsize=(15,5))
plt.subplot(2, 4, 1)
plt.imshow(img, cmap='gray')
plt.title(f'Original ({original_w}x{original_h})')
plt.axis('off')
for i, resized in enumerate(bilinear_images):
plt.subplot(2, 4, i+2)
h, w = resized.shape
plt.imshow(resized, cmap='gray')
plt.title(f'{w}x{h}')
plt.axis('off')
plt.tight_layout()
plt.show()resize() function in OpenCV allows adjusting the size of an image in different ways: fx: Scale factor along the horizontal axis. fy: Scale factor along the vertical axis. interpolation) cv2.INTER_NEARESTcv2.INTER_LINEARcv2.INTER_CUBIC resize() function, youl will first create a simple grayscale test image using NumPy and Matplotlib. The image will be 6×6 pixels and structured as follows: imgtest) is created, it should be displayed before applying any resizing operations. resize() function to modify its dimensions. This involves adjusting the scale factors along the horizontal (fx) and vertical (fy) axes and experimenting with different interpolation methods. resize() to scale imgtest by modifying the fx and fy values. INTER_NEAREST, INTER_LINEAR, INTER_CUBIC) and observe their effects. imgtest and analyze the differences. # Create a 6x6 black image
imgtest = np.zeros((6,6), dtype=np.uint8)
# Add white 4x4 square starting from row 1, column 1
imgtest[1:5, 1:5] = 255
# Add a smaller black 2x2 square in the center (row 2-3, col 2-3)
imgtest[2:4, 2:4] = 0
print("Original Test Image (6x6):\n", imgtest)
# Display the image
plt.imshow(imgtest, cmap='gray', vmin=0, vmax=255)
plt.title("Original 6x6 Test Image")
plt.axis('off')
plt.show()# New size (scale factors) fx, fy = 10, 10 # upscale 10x to 60x60 # Nearest Neighbor img_nn = cv2.resize(imgtest, dsize=None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST) # Bilinear img_linear = cv2.resize(imgtest, dsize=None, fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR) # Bicubic img_cubic = cv2.resize(imgtest, dsize=None, fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
plt.figure(figsize=(12,3))
plt.subplot(1,4,1)
plt.imshow(imgtest, cmap='gray', vmin=0, vmax=255)
plt.title('Original 6x6')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(img_nn, cmap='gray', vmin=0, vmax=255)
plt.title('Nearest Neighbor')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(img_linear, cmap='gray', vmin=0, vmax=255)
plt.title('Bilinear')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(img_cubic, cmap='gray', vmin=0, vmax=255)
plt.title('Bicubic')
plt.axis('off')
plt.tight_layout()
plt.show()resize() function to a real-world image, cameraman.png. This will allow you to observe how resizing affects a more complex image compared to the simple structured pattern of imgtest. cameraman.png by adjusting fx and fy values to scale the image both up and down. 0 and 1 to shrink the image while maintaining aspect ratio. rows = 250, cols = 400) instead of using scale factors. imgtest and discuss how interpolation affects a natural image differently from a synthetic one. # Load the image in grayscale
img_cam = cv2.imread('Images\cameraman.png', cv2.IMREAD_GRAYSCALE)
h, w = img_cam.shape
print(f"Original Cameraman size: {w}x{h}")
plt.imshow(img_cam, cmap='gray')
plt.title(f'Original Cameraman ({w}x{h})')
plt.axis('off')
plt.show()
# Upscale 1.5x img_up = cv2.resize(img_cam, dsize=None, fx=1.5, fy=1.5, interpolation=cv2.INTER_LINEAR) # Downscale 0.5x img_down = cv2.resize(img_cam, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) # Downscale 0.75x (maintaining aspect ratio) img_down75 = cv2.resize(img_cam, dsize=None, fx=0.75, fy=0.75, interpolation=cv2.INTER_LINEAR)
# Resize to 400x250 (cols x rows) img_exact = cv2.resize(img_cam, dsize=(400, 250), interpolation=cv2.INTER_LINEAR)
plt.figure(figsize=(15,5))
plt.subplot(1,5,1)
plt.imshow(img_cam, cmap='gray')
plt.title(f'Original\n{w}x{h}')
plt.axis('off')
plt.subplot(1,5,2)
h_up, w_up = img_up.shape
plt.imshow(img_up, cmap='gray')
plt.title(f'Upscale 1.5x\n{w_up}x{h_up}')
plt.axis('off')
plt.subplot(1,5,3)
h_down, w_down = img_down.shape
plt.imshow(img_down, cmap='gray')
plt.title(f'Downscale 0.5x\n{w_down}x{h_down}')
plt.axis('off')
plt.subplot(1,5,4)
h_down75, w_down75 = img_down75.shape
plt.imshow(img_down75, cmap='gray')
plt.title(f'Downscale 0.75x\n{w_down75}x{h_down75}')
plt.axis('off')
plt.subplot(1,5,5)
h_exact, w_exact = img_exact.shape
plt.imshow(img_exact, cmap='gray')
plt.title(f'Exact 400x250\n{w_exact}x{h_exact}')
plt.axis('off')
plt.tight_layout()
plt.show()resize() function to both imgtest and cameraman.png, analyze the results by answering the following questions: imgtest) compare to resizing a real-world image (cameraman.png)? fx, fy) differ from specifying exact dimensions (rows, cols)? warpAffine() function to shift an image in different directions. images folder to perform the translation. warpAffine() from the cv2 module to apply the transformation. # Perform image translation as described above.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load an example image (you can replace with your choice from 'images' folder)
img = cv2.imread('Images\cameraman.png', cv2.IMREAD_GRAYSCALE)
h, w = img.shape
print(f"Original image size: {w}x{h}")
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()def translate_image(image, tx=0, ty=0):
"""
Translate an image by tx (x-axis) and ty (y-axis) using warpAffine.
Fills empty areas by replicating edge pixels to remove black borders.
"""
rows, cols = image.shape
# Translation matrix
M = np.float32([[1, 0, tx],
[0, 1, ty]])
# Apply warpAffine
translated = cv2.warpAffine(image, M, (cols, rows), borderMode=cv2.BORDER_REPLICATE)
return translated# 100 pixels horizontally img_tx = translate_image(img, tx=100, ty=0) # 200 pixels vertically img_ty = translate_image(img, tx=0, ty=200) # Both axes: 100 right, 200 down img_txy = translate_image(img, tx=100, ty=200)
plt.figure(figsize=(15,4))
plt.subplot(1,4,1)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(img_tx, cmap='gray')
plt.title('Shift 100 px right')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(img_ty, cmap='gray')
plt.title('Shift 200 px down')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(img_txy, cmap='gray')
plt.title('Shift 100 right & 200 down')
plt.axis('off')
plt.tight_layout()
plt.show()Circle.png as the input image.# Challenge 1: Apply static translation to the image.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the Circle image
img = cv2.imread('Images\Circle.png', cv2.IMREAD_UNCHANGED) # supports alpha channel if PNG
h, w = img.shape[:2]
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) if img.shape[2]==3 else plt.imshow(img)
plt.title('Original Circle Image')
plt.axis('off')
plt.show()def translate_image(image, tx=0, ty=0):
"""
Translate image by tx (x-axis) and ty (y-axis) using warpAffine.
Uses border replication to avoid black pixels.
"""
rows, cols = image.shape[:2]
M = np.float32([[1, 0, tx],
[0, 1, ty]])
translated = cv2.warpAffine(image, M, (cols, rows), borderMode=cv2.BORDER_REPLICATE)
return translated# Number of frames
num_frames = 20
shift_per_frame = 10 # pixels per frame
plt.figure(figsize=(10,5))
for i in range(num_frames):
tx = i * shift_per_frame # move right each frame
ty = 0 # no vertical movement
img_moved = translate_image(img, tx=tx, ty=ty)
plt.clf()
plt.imshow(cv2.cvtColor(img_moved, cv2.COLOR_BGR2RGB)) if img.shape[2]==3 else plt.imshow(img_moved)
plt.title(f'Frame {i+1}: Shift {tx}px right')
plt.axis('off')
plt.pause(0.2) # pause 200ms per frame
plt.show()VideoWriter object. fourcc = cv2.VideoWriter_fourcc(*'mp4v')video_writer = cv2.VideoWriter('your/saving/path/output_video.mp4', fourcc, 30, (image_width, image_height))video_writer.write(frame)video_writer.release()# Challenge 2: Animate translation using OpenCV VideoWriter.
import cv2
import numpy as np
# Load Circle.png
img = cv2.imread('Images\Circle.png', cv2.IMREAD_UNCHANGED) # preserves alpha if PNG
h, w = img.shape[:2]# Video parameters
output_path = 'dynamic_translation.mp4'
fps = 30
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
# Function to convert image with alpha to BGR for video (if PNG has alpha)
def to_bgr(img):
if img.shape[2] == 4: # has alpha
alpha = img[:,:,3]/255.0
bgr = img[:,:,:3].astype(np.float32)
bgr[:,:,0] = bgr[:,:,0]*alpha + (1-alpha)*0 # background=black
bgr[:,:,1] = bgr[:,:,1]*alpha + (1-alpha)*0
bgr[:,:,2] = bgr[:,:,2]*alpha + (1-alpha)*0
return bgr.astype(np.uint8)
return imgdef translate_wrap(image, tx=0, ty=0):
"""
Translate an image with wrap-around effect.
Positive tx moves right, positive ty moves down.
"""
rows, cols = image.shape[:2]
# Horizontal wrap
tx = tx % cols
ty = ty % rows
# Horizontal shift
Mx = np.float32([[1,0,tx],[0,1,0]])
shifted_x = cv2.warpAffine(image, Mx, (cols, rows), borderMode=cv2.BORDER_WRAP)
# Vertical shift
My = np.float32([[1,0,0],[0,1,ty]])
shifted_xy = cv2.warpAffine(shifted_x, My, (cols, rows), borderMode=cv2.BORDER_WRAP)
return shifted_xynum_frames = 120 # 4 seconds at 30fps
# Parameters for sinewave motion
amplitude = 50 # vertical displacement
frequency = 2 * np.pi / num_frames # one full wave over animation
for i in range(num_frames):
# Horizontal translation with wrap
tx_h = i * 5 # 5 px per frame
frame_h = translate_wrap(img, tx=tx_h, ty=0)
# Vertical translation with wrap
ty_v = i * 3
frame_v = translate_wrap(img, tx=0, ty=ty_v)
# Sinewave horizontal translation
tx_sine = i * 5
ty_sine = int(amplitude * np.sin(frequency * i))
frame_sine = translate_wrap(img, tx=tx_sine, ty=ty_sine)
# Combine or choose one to write (example: sinewave)
video_writer.write(to_bgr(frame_sine))video_writer.release()
print("Video saved as dynamic_translation.mp4")images folder.cv2.getRotationMatrix2D() to generate the rotation matrix and apply it to the image. import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load image (replace with any image from 'images' folder)
img = cv2.imread('Images\cameraman.png', cv2.IMREAD_GRAYSCALE)
h, w = img.shape
print(f"Original image size: {w}x{h}")
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()def rotate_image(image, angle):
"""
Rotate an image around its center by the given angle (in degrees)
"""
h, w = image.shape[:2]
center = (w//2, h//2)
# Rotation matrix
M = cv2.getRotationMatrix2D(center, angle, 1) # scale=1
rotated = cv2.warpAffine(image, M, (w, h), borderMode=cv2.BORDER_REPLICATE)
return rotatedangles = [45, 90, 180, 270, 360] # degrees rotated_images = [rotate_image(img, angle) for angle in angles]
plt.figure(figsize=(15,5))
plt.subplot(1, len(angles)+1, 1)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
for i, rotated in enumerate(rotated_images):
plt.subplot(1, len(angles)+1, i+2)
plt.imshow(rotated, cmap='gray')
plt.title(f'{angles[i]}°')
plt.axis('off')
plt.tight_layout()
plt.show()images folder. cv2.getRotationMatrix2D() to compute the transformation matrix based on the chosen pivot point. import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load image
img = cv2.imread('Images/cameraman.png', cv2.IMREAD_GRAYSCALE)
h, w = img.shape
print(f"Original image size: {w}x{h}")
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()def rotate_image_custom_pivot(image, angle, pivot):
"""
Rotate an image around a custom pivot point.
Parameters:
image: input grayscale image
angle: rotation angle in degrees (clockwise)
pivot: tuple (x, y) for rotation pivot point
"""
# Rotation matrix
M = cv2.getRotationMatrix2D(pivot, angle, 1) # scale=1
h, w = image.shape[:2]
rotated = cv2.warpAffine(image, M, (w, h), borderMode=cv2.BORDER_REPLICATE)
return rotatedangles = [45, 90] # rotation angles
pivot_points = [(0,0), (w//4, h//4)] # top-left corner, off-center
plt.figure(figsize=(12,5))
for i, pivot in enumerate(pivot_points):
for j, angle in enumerate(angles):
rotated = rotate_image_custom_pivot(img, angle, pivot)
plt.subplot(len(pivot_points), len(angles), i*len(angles)+j+1)
plt.imshow(rotated, cmap='gray')
plt.title(f'Angle: {angle}°\nPivot: {pivot}')
plt.axis('off')
plt.tight_layout()
plt.show()pepper.png image. # Apply and reproduce the affine transformations observed in the given output.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load image
img = cv2.imread('Images/cameraman.png', cv2.IMREAD_GRAYSCALE)
h, w = img.shape
print(f"Original image size: {w}x{h}")
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.show()# Translation
def translate(image, tx=0, ty=0):
M = np.float32([[1,0,tx],[0,1,ty]])
return cv2.warpAffine(image, M, (image.shape[1], image.shape[0]), borderMode=cv2.BORDER_REPLICATE)
# Rotation around center
def rotate(image, angle):
center = (image.shape[1]//2, image.shape[0]//2)
M = cv2.getRotationMatrix2D(center, angle, 1)
return cv2.warpAffine(image, M, (image.shape[1], image.shape[0]), borderMode=cv2.BORDER_REPLICATE)
# Scaling
def scale(image, fx=1, fy=1):
return cv2.resize(image, None, fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR)
# Horizontal flip
def flip_horizontal(image):
return cv2.flip(image, 1)
# Vertical flip
def flip_vertical(image):
return cv2.flip(image, 0)# Example transformations
transformed_images = {
'Translate (100 right, 50 down)': translate(img, tx=100, ty=50),
'Rotate 45°': rotate(img, 45),
'Scale 1.5x': scale(img, fx=1.5, fy=1.5),
'Flip Horizontal': flip_horizontal(img),
'Flip Vertical': flip_vertical(img)
}plt.figure(figsize=(18,5))
# Original
plt.subplot(1, len(transformed_images)+1, 1)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
# Transformed
for i, (name, t_img) in enumerate(transformed_images.items()):
plt.subplot(1, len(transformed_images)+1, i+2)
plt.imshow(t_img, cmap='gray')
plt.title(name)
plt.axis('off')
plt.tight_layout()
plt.show()leaning.jpg from the images folder as the input image. leaning.jpg and analyze its tilt. cv2.getRotationMatrix2D() or cv2.warpAffine() to straighten the image. # Apply transformation to align leaning.jpg.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load tilted image
img = cv2.imread('Images/leaning.jpg')
h, w = img.shape[:2]
print(f"Original image size: {w}x{h}")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Tilted Image')
plt.axis('off')
plt.show()gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150)
plt.imshow(edges, cmap='gray')
plt.title('Edges for Tilt Analysis')
plt.axis('off')
plt.show()lines = cv2.HoughLines(edges, 1, np.pi/180, threshold=100)
angles = []
if lines is not None:
for rho,theta in lines[:,0]:
angle = (theta*180/np.pi) - 90 # convert to degrees, horizontal reference
angles.append(angle)
# Average angle of detected lines
if angles:
tilt_angle = np.mean(angles)
else:
tilt_angle = 0
print(f"Estimated tilt angle: {tilt_angle:.2f}°")def rotate_image(image, angle):
h, w = image.shape[:2]
center = (w//2, h//2)
M = cv2.getRotationMatrix2D(center, angle, 1)
rotated = cv2.warpAffine(image, M, (w, h), borderMode=cv2.BORDER_REPLICATE)
return rotated
corrected = rotate_image(img, -tilt_angle)plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Tilted Image')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(cv2.cvtColor(corrected, cv2.COLOR_BGR2RGB))
plt.title('Corrected Alignment')
plt.axis('off')
plt.tight_layout()
plt.show()cv2.getPerspectiveTransform() and cv2.warpPerspective() to reproduce the transformations using the Earth.bmp image. # Apply perspective transformation here.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load Earth.bmp
img = cv2.imread('Images/Earth.bmp')
h, w = img.shape[:2]
print(f"Original image size: {w}x{h}")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
plt.show()# Source points (corners of the original image)
src_pts = np.float32([[0,0], [w-1,0], [w-1,h-1], [0,h-1]])
# Destination points (simulate perspective tilt)
dst_pts = np.float32([
[50,50], # top-left moves inward
[w-50, 0], # top-right moves inward
[w-10, h-10], # bottom-right slightly shifted
[10, h-20] # bottom-left slightly shifted
])# Source points (corners of the original image)
src_pts = np.float32([[0,0], [w-1,0], [w-1,h-1], [0,h-1]])
# Destination points (simulate perspective tilt)
dst_pts = np.float32([
[50,50], # top-left moves inward
[w-50, 0], # top-right moves inward
[w-10, h-10], # bottom-right slightly shifted
[10, h-20] # bottom-left slightly shifted
])M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(img, M, (w, h))
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Image')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(cv2.cvtColor(warped, cv2.COLOR_BGR2RGB))
plt.title('Perspective Transformed')
plt.axis('off')
plt.tight_layout()
plt.show()road.png from the images folder as the input image. road.png using cv2.getPerspectiveTransform() and cv2.warpPerspective(). import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load road.png
img = cv2.imread('Images/road.png')
h, w = img.shape[:2]
print(f"Original image size: {w}x{h}")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Road Image')
plt.axis('off')
plt.show()# Example source points (adjust based on your road.png)
src_pts = np.float32([
[570, 470], # top-left of lane
[710, 470], # top-right of lane
[1100, 720], # bottom-right
[200, 720] # bottom-left
])
# Destination points for a top-down rectangle
dst_pts = np.float32([
[200, 0], # top-left
[900, 0], # top-right
[900, 720], # bottom-right
[200, 720] # bottom-left
])M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(img, M, (w, h))
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.title('Original Road Image')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(cv2.cvtColor(warped, cv2.COLOR_BGR2RGB))
plt.title('Bird\'s Eye View')
plt.axis('off')
plt.tight_layout()
plt.show()cv2.bitwise_*() functions, which perform logical operations at the pixel level. Each pixel's value is modified based on the corresponding pixel values from input images, following standard bitwise logic rules (AND, OR, XOR, NOT). cv2.bitwise_not() cv2.bitwise_and() cv2.bitwise_or() cv2.bitwise_xor() # Add the corresponding code here. import cv2 import numpy as np import matplotlib.pyplot as plt
# Image dimensions
h, w = 256, 256
# Create white images (255 = white, 0 = black)
A = np.ones((h,w), dtype=np.uint8) * 255
B = np.ones((h,w), dtype=np.uint8) * 255
# Draw black rectangle in Image A: 90 ≤ x ≤ 120, 90 ≤ y ≤ 180
A[90:181, 90:121] = 0
# Draw black square in Image B: 110 ≤ x ≤ 150, 110 ≤ y ≤ 150
B[110:151, 110:151] = 0
# Display A and B
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.imshow(A, cmap='gray')
plt.title('Image A')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(B, cmap='gray')
plt.title('Image B')
plt.axis('off')
plt.show()# NOT NOT_A = 255 - A NOT_B = 255 - B # AND AND_AB = np.minimum(A, B) # OR OR_AB = np.maximum(A, B) # XOR XOR_AB = np.where(A==B, 0, 255)
NOT_A_cv = cv2.bitwise_not(A) NOT_B_cv = cv2.bitwise_not(B) AND_AB_cv = cv2.bitwise_and(A, B) OR_AB_cv = cv2.bitwise_or(A, B) XOR_AB_cv = cv2.bitwise_xor(A, B)
operations = {
"NOT A": (NOT_A, NOT_A_cv),
"NOT B": (NOT_B, NOT_B_cv),
"A AND B": (AND_AB, AND_AB_cv),
"A OR B": (OR_AB, OR_AB_cv),
"A XOR B": (XOR_AB, XOR_AB_cv)
}
plt.figure(figsize=(15,10))
i = 1
for name, (manual, opencv) in operations.items():
plt.subplot(len(operations),2,i)
plt.imshow(manual, cmap='gray')
plt.title(f"Manual {name}")
plt.axis('off')
plt.subplot(len(operations),2,i+1)
plt.imshow(opencv, cmap='gray')
plt.title(f"OpenCV {name}")
plt.axis('off')
i += 2
plt.tight_layout()
plt.show()Brain.png and BrainM.png using cv2.bitwise_and(). Brain.png and BrainM.png using cv2.bitwise_or(). Before.png and After.png using cv2.bitwise_xor(). QR.png and Jellybeans.png using cv2.bitwise_not(). # Apply bitwise logical operations on the given image pairs using OpenCV. import cv2 import numpy as np import matplotlib.pyplot as plt
# Load images in grayscale
brain = cv2.imread('Images/Brain.png', cv2.IMREAD_GRAYSCALE)
brain_mask = cv2.imread('Images/BrainM.png', cv2.IMREAD_GRAYSCALE)
before = cv2.imread('Images/Before.png', cv2.IMREAD_GRAYSCALE)
after = cv2.imread('Images/After.png', cv2.IMREAD_GRAYSCALE)
qr = cv2.imread('Images/QR.png', cv2.IMREAD_GRAYSCALE)
jelly = cv2.imread('Images/Jellybeans.png', cv2.IMREAD_GRAYSCALE)
# Check if images are loaded
images = [brain, brain_mask, before, after, qr, jelly]
if any(img is None for img in images):
print("Error: One or more images not found. Check file paths!")# Brain images brain_and = cv2.bitwise_and(brain, brain_mask) brain_or = cv2.bitwise_or(brain, brain_mask) # Before and After images before_xor_after = cv2.bitwise_xor(before, after) # NOT operations qr_not = cv2.bitwise_not(qr) jelly_not = cv2.bitwise_not(jelly)
plt.figure(figsize=(15,10))
# Brain AND
plt.subplot(2,3,1)
plt.imshow(brain_and, cmap='gray')
plt.title('Brain AND BrainM')
plt.axis('off')
# Brain OR
plt.subplot(2,3,2)
plt.imshow(brain_or, cmap='gray')
plt.title('Brain OR BrainM')
plt.axis('off')
# Before XOR After
plt.subplot(2,3,3)
plt.imshow(before_xor_after, cmap='gray')
plt.title('Before XOR After')
plt.axis('off')
# QR NOT
plt.subplot(2,3,4)
plt.imshow(qr_not, cmap='gray')
plt.title('NOT QR')
plt.axis('off')
# Jellybeans NOT
plt.subplot(2,3,5)
plt.imshow(jelly_not, cmap='gray')
plt.title('NOT Jellybeans')
plt.axis('off')
plt.tight_layout()
plt.show()